# This is a BitKeeper generated patch for the following project: # Project Name: Linux kernel tree # This patch format is intended for GNU patch command version 2.5 or higher. # This patch includes the following deltas: # ChangeSet 1.1095+1.1069.27.1 -> 1.1096 # include/linux/acpi.h 1.17.1.3 -> 1.21 # include/linux/sysctl.h 1.23.1.10 -> 1.30.1.1 # mm/page_alloc.c 1.56.1.7 -> 1.61.1.1 # drivers/acpi/tables.c 1.9.1.2 -> 1.13 # arch/i386/kernel/Makefile 1.6.1.2 -> 1.8.1.1 # arch/i386/kernel/mpparse.c 1.27.1.11 -> 1.36 # arch/i386/kernel/setup.c 1.68.1.5 -> 1.77 # fs/proc/array.c 1.10.1.1 -> 1.11.1.1 # fs/proc/base.c 1.12.1.3 -> 1.18 # drivers/acpi/Config.in 1.9.1.4 -> 1.16 # Documentation/Configure.help 1.162.1.35 -> 1.177 # diff -Nru a/Documentation/Configure.help b/Documentation/Configure.help --- a/Documentation/Configure.help Thu Oct 9 15:19:32 2003 +++ b/Documentation/Configure.help Thu Oct 9 15:19:32 2003 @@ -18277,6 +18277,11 @@ purpose port, say Y here. See . +Support for serial ports defined in ACPI namespace +CONFIG_SERIAL_ACPI + If you wish to enable serial port discovery via the ACPI + namespace, say Y here. If unsure, say N. + Support for PowerMac serial ports CONFIG_MAC_SERIAL If you have Macintosh style serial ports (8 pin mini-DIN), say Y diff -Nru a/drivers/acpi/tables.c b/drivers/acpi/tables.c --- a/drivers/acpi/tables.c Thu Oct 9 15:19:31 2003 +++ b/drivers/acpi/tables.c Thu Oct 9 15:19:31 2003 @@ -262,10 +262,17 @@ /* Map the DSDT header via the pointer in the FADT */ if (id == ACPI_DSDT) { - struct acpi_table_fadt *fadt = (struct acpi_table_fadt *) *header; + struct fadt_descriptor_rev2 *fadt = (struct fadt_descriptor_rev2 *) *header; + + if (fadt->header.revision == 3 && fadt->Xdsdt) { + *header = (void *) __acpi_map_table(fadt->Xdsdt, + sizeof(struct acpi_table_header)); + } else if (fadt->V1_dsdt) { + *header = (void *) __acpi_map_table(fadt->V1_dsdt, + sizeof(struct acpi_table_header)); + } else + *header = 0; - *header = (void *) __acpi_map_table(fadt->dsdt_addr, - sizeof(struct acpi_table_header)); if (!*header) { printk(KERN_WARNING PREFIX "Unable to map DSDT\n"); return -ENODEV; diff -Nru a/fs/proc/array.c b/fs/proc/array.c --- a/fs/proc/array.c Thu Oct 9 15:19:31 2003 +++ b/fs/proc/array.c Thu Oct 9 15:19:31 2003 @@ -64,6 +64,7 @@ #include #include #include +#include #include #include #include @@ -490,6 +491,18 @@ pgd_t *pgd = pgd_offset(mm, vma->vm_start); int pages = 0, shared = 0, dirty = 0, total = 0; + if (is_vm_hugetlb_page(vma)) { + int num_pages = ((vma->vm_end - vma->vm_start)/PAGE_SIZE); + resident +=num_pages; + if (!(vma->vm_flags & VM_DONTCOPY)) + share += num_pages; + if (vma->vm_flags & VM_WRITE) + dt += num_pages; + drs += num_pages; + vma = vma->vm_next; + continue; + + } statm_pgd_range(pgd, vma->vm_start, vma->vm_end, &pages, &shared, &dirty, &total); resident += pages; share += shared; diff -Nru a/fs/proc/base.c b/fs/proc/base.c --- a/fs/proc/base.c Thu Oct 9 15:19:31 2003 +++ b/fs/proc/base.c Thu Oct 9 15:19:31 2003 @@ -476,7 +476,24 @@ } #endif +static loff_t mem_lseek(struct file * file, loff_t offset, int orig) +{ + switch (orig) { + case 0: + file->f_pos = offset; + break; + case 1: + file->f_pos += offset; + break; + default: + return -EINVAL; + } + force_successful_syscall_return(); + return file->f_pos; +} + static struct file_operations proc_mem_operations = { + llseek: mem_lseek, read: mem_read, write: mem_write, open: mem_open, diff -Nru a/include/linux/sysctl.h b/include/linux/sysctl.h --- a/include/linux/sysctl.h Thu Oct 9 15:19:31 2003 +++ b/include/linux/sysctl.h Thu Oct 9 15:19:31 2003 @@ -154,6 +154,7 @@ VM_GFP_DEBUG=18, /* debug GFP failures */ VM_CACHE_SCAN_RATIO=19, /* part of the inactive cache list to scan */ VM_MAPPED_RATIO=20, /* amount of unfreeable pages that triggers swapout */ + VM_HUGETLB_PAGES=21, /* int: Number of available Huge Pages */ }; diff -Nru a/mm/page_alloc.c b/mm/page_alloc.c --- a/mm/page_alloc.c Thu Oct 9 15:19:31 2003 +++ b/mm/page_alloc.c Thu Oct 9 15:19:31 2003 @@ -49,11 +49,11 @@ /* * Temporary debugging check. */ -#define BAD_RANGE(zone, page) \ -( \ - (((page) - mem_map) >= ((zone)->zone_start_mapnr+(zone)->size)) \ - || (((page) - mem_map) < (zone)->zone_start_mapnr) \ - || ((zone) != page_zone(page)) \ +#define BAD_RANGE(zone, page) \ +( \ + (((page) - mem_map) >= ((zone)->zone_start_mapnr+(zone)->size)) \ + || (((page) - mem_map) < (zone)->zone_start_mapnr) \ + || ((zone) != page_zone(page)) \ ) /* @@ -573,7 +573,7 @@ unsigned long nr, total, flags; total = 0; - if (zone->size) { + if (zone->realsize) { spin_lock_irqsave(&zone->lock, flags); for (order = 0; order < MAX_ORDER; order++) { head = &(zone->free_area + order)->free_list; @@ -605,13 +605,44 @@ /* * Builds allocation fallback zone lists. */ -static inline void build_zonelists(pg_data_t *pgdat) +static int __init build_zonelists_node(pg_data_t *pgdat, zonelist_t *zonelist, int j, int k) { - int i, j, k; + zone_t *zone; + switch (k) { + default: + BUG(); + /* + * fallthrough: + */ + case ZONE_HIGHMEM: + zone = pgdat->node_zones + ZONE_HIGHMEM; + if (zone->realsize) { +#ifndef CONFIG_HIGHMEM + BUG(); +#endif + zonelist->zones[j++] = zone; + } + case ZONE_NORMAL: + zone = pgdat->node_zones + ZONE_NORMAL; + if (zone->realsize) + zonelist->zones[j++] = zone; + case ZONE_DMA: + zone = pgdat->node_zones + ZONE_DMA; + if (zone->realsize) + zonelist->zones[j++] = zone; + } + + return j; +} + +static void __init build_zonelists(pg_data_t *pgdat) +{ + int i, j, k, node, local_node; + local_node = pgdat->node_id; + printk("Building zonelist for node : %d\n", local_node); for (i = 0; i <= GFP_ZONEMASK; i++) { zonelist_t *zonelist; - zone_t *zone; zonelist = pgdat->node_zonelists + i; memset(zonelist, 0, sizeof(*zonelist)); @@ -623,33 +654,32 @@ if (i & __GFP_DMA) k = ZONE_DMA; - switch (k) { - default: - BUG(); - /* - * fallthrough: - */ - case ZONE_HIGHMEM: - zone = pgdat->node_zones + ZONE_HIGHMEM; - if (zone->size) { -#ifndef CONFIG_HIGHMEM - BUG(); -#endif - zonelist->zones[j++] = zone; - } - case ZONE_NORMAL: - zone = pgdat->node_zones + ZONE_NORMAL; - if (zone->size) - zonelist->zones[j++] = zone; - case ZONE_DMA: - zone = pgdat->node_zones + ZONE_DMA; - if (zone->size) - zonelist->zones[j++] = zone; - } + j = build_zonelists_node(pgdat, zonelist, j, k); + /* + * Now we build the zonelist so that it contains the zones + * of all the other nodes. + * We don't want to pressure a particular node, so when + * building the zones for node N, we make sure that the + * zones coming right after the local ones are those from + * node N+1 (modulo N) + */ + for (node = local_node + 1; node < numnodes; node++) + j = build_zonelists_node(NODE_DATA(node), zonelist, j, k); + for (node = 0; node < local_node; node++) + j = build_zonelists_node(NODE_DATA(node), zonelist, j, k); + zonelist->zones[j++] = NULL; } } +void __init build_all_zonelists(void) +{ + int i; + + for(i = 0 ; i < numnodes ; i++) + build_zonelists(NODE_DATA(i)); +} + /* * Helper functions to size the waitqueue hash table. * Essentially these want to choose hash table sizes sufficiently @@ -692,6 +722,31 @@ return ffz(~size); } +static unsigned long memmap_init(struct page *start, struct page *end, + int zone, unsigned long start_paddr, int highmem) +{ + struct page *page; + + for (page = start; page < end; page++) { + set_page_zone(page, zone); + set_page_count(page, 0); + SetPageReserved(page); + INIT_LIST_HEAD(&page->list); + if (!highmem) + set_page_address(page, __va(start_paddr)); + start_paddr += PAGE_SIZE; + } + return start_paddr; +} + +#ifdef HAVE_ARCH_MEMMAP_INIT +#define MEMMAP_INIT(start, end, zone, paddr, highmem) \ + arch_memmap_init(memmap_init, start, end, zone, paddr, highmem) +#else +#define MEMMAP_INIT(start, end, zone, paddr, highmem) \ + memmap_init(start, end, zone, paddr, highmem) +#endif + #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) /* @@ -713,10 +768,8 @@ BUG(); totalpages = 0; - for (i = 0; i < MAX_NR_ZONES; i++) { - unsigned long size = zones_size[i]; - totalpages += size; - } + for (i = 0; i < MAX_NR_ZONES; i++) + totalpages += zones_size[i]; realtotalpages = totalpages; if (zholes_size) for (i = 0; i < MAX_NR_ZONES; i++) @@ -725,7 +778,7 @@ printk("On node %d totalpages: %lu\n", nid, realtotalpages); /* - * Some architectures (with lots of mem and discontinous memory + * Some architectures (with lots of mem and discontigous memory * maps) have to search for a good mem_map area: * For discontigmem, the conceptual mem map array starts from * PAGE_OFFSET, we need to align the actual array onto a mem map @@ -738,7 +791,7 @@ MAP_ALIGN((unsigned long)lmem_map - PAGE_OFFSET)); } *gmap = pgdat->node_mem_map = lmem_map; - pgdat->node_size = totalpages; + pgdat->node_size = 0; pgdat->node_start_paddr = zone_start_paddr; pgdat->node_start_mapnr = (lmem_map - mem_map); pgdat->nr_zones = 0; @@ -755,7 +808,7 @@ if (zholes_size) realsize -= zholes_size[j]; - printk("zone(%lu): %lu pages.\n", j, size); + printk("zone(%lu): %lu pages.\n", j, realsize); zone->size = size; zone->realsize = realsize; zone->name = zone_names[j]; @@ -766,6 +819,7 @@ zone->nr_active_pages = zone->nr_inactive_pages = 0; + pgdat->node_size += realsize; if (!size) continue; @@ -826,16 +880,10 @@ * up by free_all_bootmem() once the early boot process is * done. Non-atomic initialization, single-pass. */ - for (i = 0; i < size; i++) { - struct page *page = mem_map + offset + i; - set_page_zone(page, nid * MAX_NR_ZONES + j); - set_page_count(page, 0); - SetPageReserved(page); - INIT_LIST_HEAD(&page->list); - if (j != ZONE_HIGHMEM) - set_page_address(page, __va(zone_start_paddr)); - zone_start_paddr += PAGE_SIZE; - } + zone_start_paddr = MEMMAP_INIT(mem_map + offset, + mem_map + offset + size, + nid * MAX_NR_ZONES + j, zone_start_paddr, + (j == ZONE_HIGHMEM ? 1 : 0)); offset += size; for (i = 0; ; i++) { @@ -876,7 +924,6 @@ (unsigned long *) alloc_bootmem_node(pgdat, bitmap_size); } } - build_zonelists(pgdat); } void __init free_area_init(unsigned long *zones_size)